def show_video(filename):
from IPython.display import display, Image, HTML
from base64 import b64encode
video = open(mc.figpath + filename + mc.vext, "rb").read()
video_encoded = b64encode(video)
video_tag = '<video controls autoplay="autoplay" loop="loop" width=350px src="data:video/x-m4v;base64,{0}">'.format(video_encoded)
display(HTML(data=video_tag))
Motion Clouds are synthesized textures which aim at having similar characteristics as natural images but with controlled parameters. There are many ways to achieve these results and this notebook aims at showing that different procedures from different communities (neurioscience, modelling, computer vision, ...) may produce similar results.
import MotionClouds as mc
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
import scipy.misc
lena = scipy.misc.lena() * 1.
lena -= lena.mean()
lena /= lena.std()
print lena.shape
imshow(lena, cmap=cm.gray)
def noise(image=lena):
for axis in [0, 1]:
image = np.roll(image, np.random.randint(image.shape[axis]), axis=axis)
return image
imshow(noise(), cmap=cm.gray)
imshow(noise(), cmap=cm.gray)
Now, we define the ARMA process as an averaging process with a certain time constant \(\tau=30.\) (in frames).
def ARMA(image, tau=30.):
image = (1 - 1/tau)* image + 1/tau * noise()
return image
initializing
image = ARMA(lena)
imshow(image, cmap=cm.gray)
for _ in range(1000): image = ARMA(image)
imshow(image, cmap=cm.gray)
for _ in range(1000): image = ARMA(image)
imshow(image, cmap=cm.gray)
for _ in range(1000): image = ARMA(image)
imshow(image, cmap=cm.gray)
N_frame = 1024
z = np.zeros((lena.shape[0], lena.shape[1], N_frame))
z[:, :, 0] = image
for i_frame in range(1, N_frame):
z[:, :, i_frame] = ARMA(z[:, :, i_frame-1])
mc.anim_save(.5 + .5*z, filename='results/arma')
show_video(filename='arma')